Fix alloc_skb() to ensure data is always physically contiguous.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 8 Nov 2005 15:48:42 +0000 (16:48 +0100)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Tue, 8 Nov 2005 15:48:42 +0000 (16:48 +0100)
Signed-off-by: Keir Fraser <keir@xensource.com>
linux-2.6-xen-sparse/arch/xen/Kconfig
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_32
linux-2.6-xen-sparse/arch/xen/configs/xen0_defconfig_x86_64
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_32
linux-2.6-xen-sparse/arch/xen/configs/xenU_defconfig_x86_64
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_32
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64
linux-2.6-xen-sparse/arch/xen/kernel/skbuff.c
linux-2.6-xen-sparse/net/core/skbuff.c

index b03a8b0d7c59da6269e16fc1d21599b0eeaeaa93..790c2d82e1f88824e3a3386561c5a8c6438cc418 100644 (file)
@@ -173,6 +173,10 @@ endchoice
 
 endmenu
 
+config HAVE_ARCH_ALLOC_SKB
+       bool
+       default y
+
 config HAVE_ARCH_DEV_ALLOC_SKB
        bool
        default y
index ef780d693a09b98fd9c1c0fa5c5b6cb784daae23..bae115ecc7ee1386fd84e9155b5498c1e23dffe4 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 CONFIG_XEN_X86=y
 # CONFIG_XEN_X86_64 is not set
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index eea42fed6e7650f6ba33f416c4d33f29482be1ad..cb54da3b0e6da59436c9afe1a13d16b37a8288f3 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 # CONFIG_XEN_X86 is not set
 CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index 1e1c577008001af8741001cf0574dc1e0df65552..0715a553615cfab6256ceacab05e142f181e2297 100644 (file)
@@ -22,6 +22,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 CONFIG_XEN_X86=y
 # CONFIG_XEN_X86_64 is not set
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index 1547c05a989089ad634d7d03576925475d6f4ba1..87ca4d45e1a494dd88484ce1d01a645d5121157d 100644 (file)
@@ -22,6 +22,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 # CONFIG_XEN_X86 is not set
 CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index 63e1d4cc9c314e61ae5af386d02749bf0f8dc14b..d7e9af6b784b553b0d2a9227645297d6c3b2091f 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 CONFIG_XEN_X86=y
 # CONFIG_XEN_X86_64 is not set
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index 10dcc41c0437bbc1df1ff4ec28c4b836afbc4f2b..4a14a2d9ba9797fb1302be971647e9d30d8405d4 100644 (file)
@@ -25,6 +25,7 @@ CONFIG_XEN_NETDEV_FRONTEND=y
 CONFIG_XEN_SCRUB_PAGES=y
 # CONFIG_XEN_X86 is not set
 CONFIG_XEN_X86_64=y
+CONFIG_HAVE_ARCH_ALLOC_SKB=y
 CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
 
 #
index 1a1d1131867bd884d95a3ef47722841b7ec1d4c2..e1cd013e247bdfbe0860965740fd1b87c0f4a5df 100644 (file)
 #define MAX_SKBUFF_ORDER 2
 static kmem_cache_t *skbuff_order_cachep[MAX_SKBUFF_ORDER + 1];
 
+static struct {
+       int size;
+       kmem_cache_t *cachep;
+} skbuff_small[] = { { 512, NULL }, { 2048, NULL } };
+
+struct sk_buff *alloc_skb(unsigned int length, int gfp_mask)
+{
+       int order, i;
+       kmem_cache_t *cachep;
+
+       length = SKB_DATA_ALIGN(length) + sizeof(struct skb_shared_info);
+
+       if (length <= skbuff_small[ARRAY_SIZE(skbuff_small)-1].size) {
+               for (i = 0; skbuff_small[i].size < length; i++)
+                       continue;
+               cachep = skbuff_small[i].cachep;
+       } else {
+               order = get_order(length);
+               if (order > MAX_SKBUFF_ORDER) {
+                       printk(KERN_ALERT "Attempt to allocate order %d "
+                              "skbuff. Increase MAX_SKBUFF_ORDER.\n", order);
+                       return NULL;
+               }
+               cachep = skbuff_order_cachep[order];
+       }
+
+       length -= sizeof(struct skb_shared_info);
+
+       return alloc_skb_from_cache(cachep, length, gfp_mask);
+}
+
 struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask)
 {
        struct sk_buff *skb;
@@ -68,8 +99,20 @@ static void skbuff_dtor(void *buf, kmem_cache_t *cachep, unsigned long unused)
 static int __init skbuff_init(void)
 {
        static char name[MAX_SKBUFF_ORDER + 1][20];
+       static char small_name[ARRAY_SIZE(skbuff_small)][20];
        unsigned long size;
-       int order;
+       int i, order;
+
+       for (i = 0; i < ARRAY_SIZE(skbuff_small); i++) {
+               size = skbuff_small[i].size;
+               sprintf(small_name[i], "xen-skb-%lu", size);
+               /*
+                * No ctor/dtor: objects do not span page boundaries, and they
+                * are only used on transmit path so no need for scrubbing.
+                */
+               skbuff_small[i].cachep = kmem_cache_create(
+                       small_name[i], size, size, 0, NULL, NULL);
+       }
 
        for (order = 0; order <= MAX_SKBUFF_ORDER; order++) {
                size = PAGE_SIZE << order;
@@ -82,7 +125,7 @@ static int __init skbuff_init(void)
 
        return 0;
 }
-__initcall(skbuff_init);
+core_initcall(skbuff_init);
 
 EXPORT_SYMBOL(__dev_alloc_skb);
 
index 9e144aa414dbfa152de2259084cab907311d6a80..5f7d3503c4720ede24e9959ccbbb085f77ec78f0 100644 (file)
@@ -129,6 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
  *     Buffers may only be allocated from interrupts using a @gfp_mask of
  *     %GFP_ATOMIC.
  */
+#ifndef CONFIG_HAVE_ARCH_ALLOC_SKB
 struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
 {
        struct sk_buff *skb;
@@ -166,6 +167,7 @@ nodata:
        skb = NULL;
        goto out;
 }
+#endif /* !CONFIG_HAVE_ARCH_ALLOC_SKB */
 
 /**
  *     alloc_skb_from_cache    -       allocate a network buffer